Импорт всех необходимых библиотек¶
In [1]:
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.colors import PowerNorm, LogNorm
from bm3d import bm3d
from scipy.ndimage import median_filter
from scipy.ndimage import convolve
from scipy import ndimage
from skimage.filters import sobel
from skimage.metrics import structural_similarity as ssim
from pyhdf.SD import SD, SDC
import os
import math
import cv2
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.utils.data import Dataset, DataLoader
from scipy.signal import windows
from tqdm import tqdm
Функции для загрузки и визуализации данных¶
In [2]:
def load_hdf_as_numpy(file_path, var_name):
f = SD(file_path, SDC.READ)
data = f.select(var_name).get()
arr = np.array(data, dtype=np.float32)
return arr
def print_results(data_filt,
data_unf,
lower_percentile=2,
upper_percentile=98,
cmap="inferno",
norm_type="linear",
gamma=0.5):
# --- percentile-based vmin / vmax ---
all_data = np.concatenate([data_filt.ravel(), data_unf.ravel()])
diff = data_unf - data_filt
vmin = np.percentile(all_data, lower_percentile)
vmax = np.percentile(all_data, upper_percentile)
# --- choose normalization ---
if norm_type == "log":
norm = LogNorm(vmin=max(vmin, 1e-6), vmax=vmax)
elif norm_type == "power":
norm = PowerNorm(gamma=gamma, vmin=vmin, vmax=vmax)
else: # linear
norm = None
# --- plot ---
fig, axes = plt.subplots(1, 3, figsize=(18, 6))
im0 = axes[0].imshow(data_unf, cmap=cmap, vmin=vmin,
vmax=vmax, norm=norm, aspect="auto")
axes[0].set_title(f"UNFILTERED")
im1 = axes[1].imshow(data_filt, cmap=cmap, vmin=vmin,
vmax=vmax, norm=norm, aspect="auto")
axes[1].set_title(f"FILTERED")
dmin = np.percentile(diff, 1); dmax = np.percentile(diff, 99)
im2 = axes[2].imshow(diff, cmap="seismic", vmin=dmin, vmax=dmax,
norm=norm, aspect="auto")
axes[2].set_title(f"DIFFERENCE (UNFILTERED - FILTERED)")
fig.colorbar(im0, ax=axes, fraction=0.025)
# plt.tight_layout()
plt.show()
Функция, которая считает метрики между исходным и отфильтрованным изображением¶
In [3]:
def evaluate_denoising(I: np.ndarray, D: np.ndarray, eps: float = 1e-10) -> dict:
"""
I : original/noisy image (2D numpy array)
D : denoised image (2D numpy array)
Returns dictionary with:
- Noise Reduction Rate
- ENL increase
- Edge Preservation Index (EPI)
- High Frequency Energy Ratio
- Structure in residual (SSIM)
"""
I = I.astype(np.float64)
D = D.astype(np.float64)
residual = I - D
# --- 1. Noise Reduction Rate
var_I = np.var(I)
var_R = np.var(residual)
nrr = 1 - (var_R / (var_I + eps))
# --- 2. Equivalent Number of Looks (ENL)
def enl(im):
m = np.mean(im)
v = np.var(im)
return (m ** 2) / (v + eps)
enl_I = enl(I)
enl_D = enl(D)
enl_increase = (enl_D - enl_I) / (enl_I + eps)
# --- 3. Edge Preservation Index (correlation of Sobel edges)
G_I = sobel(I)
G_D = sobel(D)
epi_num = np.sum((G_I - G_I.mean()) * (G_D - G_D.mean()))
epi_den = np.sqrt(np.sum((G_I - G_I.mean())**2) * np.sum((G_D - G_D.mean())**2)) + eps
epi = epi_num / epi_den
# --- 4. High Frequency Energy ratio
def high_freq_energy(im):
low = ndimage.gaussian_filter(im, sigma=2)
high = im - low
return np.sum(high ** 2)
hf_I = high_freq_energy(I)
hf_D = high_freq_energy(D)
hf_ratio = hf_D / (hf_I + eps)
# --- 5. Structural content in residual (SSIM)
h, w = residual.shape
ssim_residual = ssim(residual, np.zeros((h, w)), data_range=residual.max() - residual.min())
return {
"Noise reduction rate (ideal 0.4–0.85)": round(float(nrr), 4),
"ENL original": round(float(enl_I), 3),
"ENL denoised": round(float(enl_D), 3),
"ENL relative increase (ideal > 1.0)": round(float(enl_increase), 4),
"Edge preservation index (ideal 0.8–1.05)": round(float(epi), 4),
"HF energy ratio (ideal 0.3–0.7)": round(float(hf_ratio), 4),
"Residual structure SSIM (ideal ≈ 0)": round(float(ssim_residual), 6),
}
Посмотрим на исходные данные¶
In [4]:
filtered_file_name = 'metm24_TA_251128_0101_9074_01_01A.hdf'
unfiltered_file_name = 'metm24_TA_251128_0101_9074_01_02A.hdf'
filtered_var_name = "m_m24_13_53_80V"
unfiltered_var_name = "m_m2413_53_80V"
data_filt = load_hdf_as_numpy(filtered_file_name, filtered_var_name)
data_unf = load_hdf_as_numpy(unfiltered_file_name, unfiltered_var_name)
print_results(data_filt, data_unf)
In [5]:
# Save images
np.save('images\\initial_data_unfiltered.npy', data_unf)
np.save('images\\initial_data_filtered.npy', data_filt)
Baseline: Median Filter¶
In [6]:
WINDOW_SIZE = 5
data_unf = load_hdf_as_numpy(unfiltered_file_name, unfiltered_var_name)
print("Image shape:", data_unf.shape)
print("Data type:", data_unf.dtype)
Image shape: (1212, 140) Data type: float32
In [7]:
filtered_image = median_filter(data_unf, size=WINDOW_SIZE)
In [8]:
print_results(filtered_image, data_unf)
In [9]:
# Save image
np.save('images\\baseline_median_data_filtered.npy', filtered_image)
Block-matching and 3D filtering (BM3D)¶
BM3D с автоматической робастной оценкой дисперсии шума
In [10]:
data_unf = load_hdf_as_numpy(unfiltered_file_name, unfiltered_var_name)
# ======================
# NORMALIZE IMAGE TO [0,1]
# ======================
min_val = np.min(data_unf)
max_val = np.max(data_unf)
image_norm = (data_unf - min_val) / (max_val - min_val + 1e-8)
print("Normalized range:", image_norm.min(), "to", image_norm.max())
# ======================
# APPLY BM3D
# ======================
# =====================
# AUTO NOISE ESTIMATION
# =====================
def estimate_noise_sigma(image):
"""
Robust noise estimation using median absolute deviation
Works best on satellite images
Returns sigma in normalized [0,1] space
"""
H = np.array([[1, -2, 1],
[-2, 4, -2],
[1, -2, 1]])
im = convolve(image, H, mode='nearest')
sigma = np.median(np.abs(im)) / 0.6745
return sigma
sigma_est = estimate_noise_sigma(image_norm)
print(f"Estimated noise sigma: {sigma_est:.5f}")
filtered_norm = bm3d(image_norm, sigma_psd=sigma_est)
# ======================
# DE-NORMALIZE BACK
# ======================
filtered_image = filtered_norm * (max_val - min_val) + min_val
Normalized range: 0.0 to 1.0 Estimated noise sigma: 0.17369
In [11]:
print_results(filtered_image, data_unf)
In [12]:
# Save images
np.save('images\\bm3d_data_filtered.npy', filtered_image)
DnCNN, NAFNet-mini и NAFNet¶
In [14]:
"""
advanced_denoise_pipeline
Integrated pipeline: DnCNN, TinyNAFNet, Full NAFNet (paper-like) with GPU support.
- Loads HDF swath (your var), self-supervised finetune if no weights, patch-based batched inference on GPU,
- Mixed precision training/inference for speed & memory efficiency,
- Visualization & no-ground-truth evaluation measures.
Usage:
- Place this file with your HDF file.
- Optionally put pretrained weights in ./weights/dncnn.pth, ./weights/tinynaf.pth, ./weights/nafnet.pth
- Run: python advanced_denoise_pipeline
"""
os.environ["CUDA_LAUNCH_BLOCKING"] = "1"
def run_experiment(model_name='dncnn'):
# -------------------------
# User / runtime parameters
# -------------------------
UNFILTERED_FILE = 'metm24_TA_251128_0101_9074_01_02A.hdf'
UNFILTERED_VAR = "m_m2413_53_80V"
WEIGHTS_DIR = "./weights"
DN_CNN_WEIGHTS = os.path.join(WEIGHTS_DIR, "dncnn.pth")
TINY_NAF_WEIGHTS = os.path.join(WEIGHTS_DIR, "tinynaf.pth")
NAFNET_WEIGHTS = os.path.join(WEIGHTS_DIR, "nafnet.pth")
# Choose model: 'dncnn', 'tinynaf', 'nafnet'
MODEL_NAME = model_name
# Patch & inference config (tunable)
PATCH_H = 128
PATCH_W = 128
STRIDE_H = PATCH_H // 2
STRIDE_W = PATCH_W // 2
BATCH_INFERENCE = 1 # number of patches processed at once during inference (GPU batched)
DEVICE = torch.device("cuda" if torch.cuda.is_available() else "cpu")
# DEVICE = torch.device('cpu')
NUM_WORKERS = 0
# PIN_MEMORY = True if DEVICE.type == "cuda" else False
PIN_MEMORY= False
torch.set_num_threads(4)
# Finetune config (self-supervised) - small on GPU
FINETUNE = True # set False if you have supervised weights
FINETUNE_EPOCHS = 60
FINETUNE_BATCH = 16
LEARNING_RATE = 1e-3
# Mixed-precision
USE_AMP = True if DEVICE.type == "cuda" else False
# USE_AMP = False
# -------------------------
# Utilities
# -------------------------
# Можно это раскомментировать если надо считывать из hdf,
# но у меня не получилось сделать так, чтобы и pyhdf работал, и
# pytorch с cuda GPU работал (что-то одно отваливается). Поэтому
# тут и далее внизу читаем и сохраняем в npy нормальный формат :)
# def load_hdf_as_numpy(file_path, var_name):
# f = SD(file_path, SDC.READ)
# data = f.select(var_name).get()
# arr = np.array(data, dtype=np.float32)
# return arr
def make_hanning_window(h, w):
win_h = windows.hann(h, sym=False)
win_w = windows.hann(w, sym=False)
win2d = np.outer(win_h, win_w).astype(np.float32)
win2d += 1e-6
return win2d
# -------------------------
# Models
# -------------------------
# 1) DnCNN (small)
class DnCNN(nn.Module):
def __init__(self, in_channels=1, features=64, depth=12):
super().__init__()
layers = [nn.Conv2d(in_channels, features, 3, padding=1), nn.ReLU(inplace=True)]
for _ in range(depth - 2):
layers += [nn.Conv2d(features, features, 3, padding=1, bias=False),
nn.BatchNorm2d(features),
nn.ReLU(inplace=True)]
layers += [nn.Conv2d(features, in_channels, 3, padding=1)]
self.net = nn.Sequential(*layers)
def forward(self, x):
# noise = self.net(x)
# return x - noise
return self.net(x)
# 2) Tiny NAF-style (light)
class SimpleNAFBlock(nn.Module):
def __init__(self, channels):
super().__init__()
self.conv1 = nn.Conv2d(channels, channels, 1)
self.conv2 = nn.Conv2d(channels, channels, 3, padding=1)
self.conv3 = nn.Conv2d(channels, channels, 1)
self.act = nn.GELU()
self.beta = nn.Parameter(torch.zeros(1, channels, 1, 1))
def forward(self, x):
y = self.conv1(x)
y = self.conv2(y)
y = self.act(y)
y = self.conv3(y)
return x + self.beta * y
class TinyNAFNet(nn.Module):
def __init__(self, in_ch=1, out_ch=1, width=32, num_blocks=6):
super().__init__()
self.entry = nn.Conv2d(in_ch, width, 3, padding=1)
self.body = nn.Sequential(*[SimpleNAFBlock(width) for _ in range(num_blocks)])
self.exit = nn.Conv2d(width, out_ch, 3, padding=1)
def forward(self, x):
fe = self.entry(x)
fb = self.body(fe)
out = self.exit(fb)
# return x - out
return out
# 3) Full paper-like NAFNet (encoder-decoder U-Net style)
# Implement core building blocks inspired by the NAFNet paper:
# NAF building blocks (faithful but practical)
class SimpleGate(nn.Module):
def forward(self, x):
a, b = x.chunk(2, dim=1)
return a * b
# Helper: LayerNorm2d (standard in NAFNet)
class LayerNorm2d(nn.Module):
def __init__(self, channels, eps=1e-6):
super().__init__()
self.weight = nn.Parameter(torch.ones(1, channels, 1, 1))
self.bias = nn.Parameter(torch.zeros(1, channels, 1, 1))
self.eps = eps
def forward(self, x):
mean = x.mean(dim=(1, 2, 3), keepdim=True)
var = x.var(dim=(1, 2, 3), keepdim=True)
x = (x - mean) / (var + self.eps).sqrt()
return x * self.weight + self.bias
class SimpleChannelAttention(nn.Module):
def __init__(self, channels):
super().__init__()
self.avg_pool = nn.AdaptiveAvgPool2d(1)
self.conv = nn.Conv2d(channels, channels, 1, bias=True)
def forward(self, x):
v = self.avg_pool(x)
v = self.conv(v)
return x * torch.sigmoid(v)
class NAFBlockV2(nn.Module):
def __init__(self, channels):
super().__init__()
# First PW -> DW -> SimpleGate -> PW
self.pw1 = nn.Conv2d(channels, channels * 2, 1, bias=True)
self.dw = nn.Conv2d(channels * 2, channels * 2, 3, padding=1, groups=channels * 2, bias=True)
self.sg = SimpleGate()
self.pw2 = nn.Conv2d(channels, channels, 1, bias=True)
# LayerNorm instead of SCA (matches NAFNet practice)
# self.norm = LayerNorm2d(channels)
self.norm = nn.GroupNorm(1, channels)
# FFN: similar structure, but no GELU in main path; GELU only in FFN if needed
self.ffn1 = nn.Conv2d(channels, channels * 2, 1, bias=True)
self.ffn_dw = nn.Conv2d(channels * 2, channels * 2, 3, padding=1, groups=channels * 2, bias=True)
self.ffn_sg = SimpleGate() # use gate instead of GELU
self.ffn2 = nn.Conv2d(channels, channels, 1, bias=True)
# Learnable residuals
self.beta = nn.Parameter(torch.zeros(1, channels, 1, 1))
self.gamma = nn.Parameter(torch.zeros(1, channels, 1, 1))
def forward(self, x):
identity = x
# Main branch
y = self.pw1(x)
y = self.dw(y)
y = self.sg(y)
y = self.pw2(y)
y = self.norm(y)
x = identity + self.beta * y
# FFN branch
identity2 = x
z = self.ffn1(x)
z = self.ffn_dw(z)
z = self.ffn_sg(z) # gated, no GELU
z = self.ffn2(z)
x = identity2 + self.gamma * z
return x
class NAFNet(nn.Module):
def __init__(self, img_channels=1, width=48, enc_depths=[2,2,4], middle_blocks=8):
super().__init__()
self.entry = nn.Conv2d(img_channels, width, 3, padding=1)
# encoder
self.encs = nn.ModuleList()
self.downs = nn.ModuleList()
ch = width
for d in enc_depths:
blocks = nn.Sequential(*[NAFBlockV2(ch) for _ in range(d)])
self.encs.append(blocks)
self.downs.append(nn.Conv2d(ch, ch*2, 2, stride=2)) # downsample by strided conv
ch *= 2
# middle
self.middle = nn.Sequential(*[NAFBlockV2(ch) for _ in range(middle_blocks)])
# decoder
self.ups = nn.ModuleList()
self.decs = nn.ModuleList()
for d in reversed(enc_depths):
self.ups.append(nn.ConvTranspose2d(ch, ch//2, 2, stride=2))
ch = ch // 2
self.decs.append(nn.Sequential(*[NAFBlockV2(ch) for _ in range(d)]))
# final conv MUST output single channel
self.exit = nn.Conv2d(width, img_channels, 3, padding=1)
def forward(self, x):
identity = x
x = self.entry(x)
enc_feats = []
for enc, down in zip(self.encs, self.downs):
x = enc(x)
enc_feats.append(x)
x = down(x)
x = self.middle(x)
for up, dec, feat in zip(self.ups, self.decs, reversed(enc_feats)):
x = up(x)
# align shapes if needed
if x.shape[2:] != feat.shape[2:]:
# center-crop to min dims
min_h = min(x.shape[2], feat.shape[2])
min_w = min(x.shape[3], feat.shape[3])
x = x[:, :, :min_h, :min_w]
feat = feat[:, :, :min_h, :min_w]
x = x + feat
x = dec(x)
out = self.exit(x)
# return identity - out # residual subtract -> final shape (B,1,H,W)
return out
# -------------------------
# Dataset for self-supervised finetuning
# -------------------------
class ImagePatchDataset(Dataset):
def __init__(self, image, patch_h=128, patch_w=128, num_samples=4000, mask_ratio=0.05):
self.image = image.astype(np.float32)
self.H, self.W = image.shape
self.ph = patch_h
self.pw = patch_w
self.num_samples = num_samples
self.mask_ratio = mask_ratio
self.coords = [ (np.random.randint(0, max(1, self.H - self.ph + 1)),
np.random.randint(0, max(1, self.W - self.pw + 1))) for _ in range(num_samples) ]
def __len__(self):
return self.num_samples
def __getitem__(self, idx):
y, x = self.coords[idx]
patch = self.image[y:y+self.ph, x:x+self.pw].copy()
# create mask
mask = np.zeros_like(patch, dtype=np.float32)
num_mask = int(self.mask_ratio * patch.size)
if num_mask > 0:
coords = np.random.choice(patch.size, size=num_mask, replace=False)
flat = mask.ravel()
flat[coords] = 1.0
mask = flat.reshape(patch.shape)
masked_patch = patch.copy()
masked_patch[mask.astype(bool)] = np.mean(patch)
# normalize per-patch
mean = masked_patch.mean()
std = masked_patch.std() if masked_patch.std() > 1e-6 else 1.0
inp = (masked_patch - mean) / std
tgt = (patch - mean) / std
# inp_t = torch.from_numpy(inp[None]).float() # (1,H,W)
inp_t = torch.from_numpy(masked_patch[None]).float()
# tgt_t = torch.from_numpy(tgt[None]).float()
tgt_t = torch.from_numpy(patch[None]).float()
mask_t = torch.from_numpy(mask[None]).float()
return inp_t, tgt_t, mask_t
# -------------------------
# Finetune function (self-supervised using masked loss)
# -------------------------
def finetune_selfsupervised(model, image, epochs=5, batch_size=16, lr=1e-4, device=DEVICE):
ds = ImagePatchDataset(image, patch_h=128, patch_w=128, num_samples=3000, mask_ratio=0.05)
dl = DataLoader(ds, batch_size=batch_size, shuffle=True, num_workers=NUM_WORKERS, pin_memory=PIN_MEMORY)
model.train()
opt = torch.optim.AdamW(model.parameters(), lr=lr, weight_decay=1e-6)
scaler = torch.amp.GradScaler('cuda',enabled=USE_AMP)
loss_fn = nn.L1Loss(reduction='none')
for ep in range(epochs):
running = 0.0
cnt = 0
pbar = tqdm(dl, desc=f"Finetune ep{ep+1}/{epochs}")
for inp, tgt, mask in pbar:
inp = inp.to(device); tgt = tgt.to(device); mask = mask.to(device)
with torch.amp.autocast('cuda', enabled=USE_AMP):
out = model(inp)
l = loss_fn(out, tgt)
masked_loss = (l * mask).sum() / (mask.sum() + 1e-8)
opt.zero_grad()
scaler.scale(masked_loss).backward()
torch.nn.utils.clip_grad_norm_(model.parameters(), max_norm=1.0)
scaler.step(opt)
scaler.update()
running += masked_loss.item()
cnt += 1
pbar.set_postfix({"avg_masked_loss": running/cnt})
print(f"Epoch {ep+1} avg masked loss: {running/cnt:.6f}")
torch.cuda.empty_cache()
model.eval()
return model
# -------------------------
# Patching & batched inference (memory-safe)
# -------------------------
def extract_patches(image, ph, pw, stride_h, stride_w):
H, W = image.shape
pad_h = (ph - (H % ph)) % ph
pad_w = (pw - (W % pw)) % pw
image_padded = np.pad(image, ((0, pad_h), (0, pad_w)), mode='reflect') if (pad_h or pad_w) else image.copy()
Hp, Wp = image_padded.shape
ys = list(range(0, Hp - ph + 1, stride_h))
xs = list(range(0, Wp - pw + 1, stride_w))
if ys[-1] != Hp - ph:
ys.append(Hp - ph)
if xs[-1] != Wp - pw:
xs.append(Wp - pw)
coords = []
for y in ys:
for x in xs:
coords.append((y, x))
return image_padded, coords
def batched_patch_inference(image, model, ph, pw, stride_h, stride_w, device, batch_size=2):
model.eval()
torch.cuda.empty_cache()
win = make_hanning_window(ph, pw)
image_padded, coords = extract_patches(image, ph, pw, stride_h, stride_w)
Hp, Wp = image_padded.shape
output = np.zeros_like(image_padded, dtype=np.float32)
weight = np.zeros_like(image_padded, dtype=np.float32)
num = len(coords)
idx = 0
print(f"Processing patches: {num}")
with torch.no_grad():
while idx < num:
# torch.cuda.synchronize()
bs = min(batch_size, num - idx)
inp_list = []
# means = []
# stds = []
for j in range(bs):
y, x = coords[idx + j]
patch = image_padded[y:y+ph, x:x+pw].astype(np.float32)
# m = patch.mean(); s = patch.std() if patch.std() > 1e-6 else 1.0
# pn = (patch - m) / s
pn = patch
inp_list.append(pn)
# means.append(m); stds.append(s)
inp_batch = np.stack(inp_list, axis=0)[:, None, :, :] # (B,1,H,W)
inp_t = torch.from_numpy(inp_batch).to(device).float()
if USE_AMP and device.type == 'cuda':
torch.cuda.empty_cache()
with torch.amp.autocast('cuda'):
out = model(inp_t)
else:
out = model(inp_t)
out_np = out.detach().cpu().numpy()[:, 0, :, :] # (B,H,W)
# accumulate
for j in range(bs):
y, x = coords[idx + j]
# patch_out = out_np[j] * stds[j] + means[j]
patch_out = out_np[j]
output[y:y+ph, x:x+pw] += patch_out * win
weight[y:y+ph, x:x+pw] += win
idx += bs
H, W = image.shape
denoised = output[:H, :W] / (weight[:H, :W] + 1e-8)
return denoised
def sliding_window_inference(image, model, patch_h, patch_w, stride_h, stride_w, device):
"""
image: 2D numpy (H, W), float32 in original units
model: torch model mapping [1,1,H,W] -> [1,1,H,W] (denoised)
returns: denoised image (H, W)
"""
model.eval()
H, W = image.shape
pad_h = (patch_h - (H % patch_h)) % patch_h
pad_w = (patch_w - (W % patch_w)) % patch_w
if pad_h > 0 or pad_w > 0:
image_padded = np.pad(image, ((0, pad_h), (0, pad_w)), mode='reflect')
else:
image_padded = image
Hp, Wp = image_padded.shape
# precompute window
win = make_hanning_window(patch_h, patch_w)
# accumulators
output = np.zeros_like(image_padded, dtype=np.float32)
weight = np.zeros_like(image_padded, dtype=np.float32)
ys = list(range(0, Hp - patch_h + 1, stride_h))
xs = list(range(0, Wp - patch_w + 1, stride_w))
# ensure last patch covers end
if ys[-1] != Hp - patch_h:
ys.append(Hp - patch_h)
if xs[-1] != Wp - patch_w:
xs.append(Wp - patch_w)
for y in ys:
for x in xs:
patch = image_padded[y:y+patch_h, x:x+patch_w]
# normalize patch to zero-mean unit-std (helps network generalize)
mean = patch.mean()
std = patch.std() if patch.std() > 1e-6 else 1.0
patch_n = (patch - mean) / std
inp = torch.from_numpy(patch_n[None, None, :, :]).to(device)
with torch.no_grad():
out = model(inp).cpu().numpy()[0, 0]
# undo normalization
out = out * std + mean
# accumulate with window
output[y:y+patch_h, x:x+patch_w] += out * win
weight[y:y+patch_h, x:x+patch_w] += win
# normalize by weight and crop back
output = output[:H, :W] / (weight[:H, :W] + 1e-8)
return output
# -------------------------
# No-GT evaluation helpers
# -------------------------
def noise_reduction_rate(I, D):
return 1 - (np.var(D) / (np.var(I) + 1e-12))
def edge_preservation_index(I, D):
from scipy import ndimage
sx = ndimage.sobel(I, axis=1)
sy = ndimage.sobel(I, axis=0)
sI = np.hypot(sx, sy)
sx2 = ndimage.sobel(D, axis=1)
sy2 = ndimage.sobel(D, axis=0)
sD = np.hypot(sx2, sy2)
return sD.sum() / (sI.sum() + 1e-12)
# -------------------------
# Main runner
# -------------------------
def run_pipeline():
# 1) load image
# image = load_hdf_as_numpy(UNFILTERED_FILE, UNFILTERED_VAR)
# np.save('image_unf.npy', image)
image_raw = np.load('images\\initial_data_unfiltered.npy')
H, W = image_raw.shape
print("Loaded image:", image_raw.shape, " dtype:", image_raw.dtype)
print("Using device:", DEVICE, "AMP enabled:", USE_AMP)
glob_mean = np.mean(image_raw)
glob_std = np.std(image_raw)
if glob_std < 1e-6: glob_std= 1.0
image = (image_raw - glob_mean) / (glob_std + 1e-8)
# 2) choose model & try loading weights
if MODEL_NAME.lower() == 'dncnn':
model = DnCNN(in_channels=1, features=64, depth=12)
wpath = DN_CNN_WEIGHTS
elif MODEL_NAME.lower() == 'tinynaf':
model = TinyNAFNet(in_ch=1, out_ch=1, width=32, num_blocks=8)
wpath = TINY_NAF_WEIGHTS
elif MODEL_NAME.lower() == 'nafnet':
# a moderately-sized NAFNet for 12GB GPU
model = NAFNet(img_channels=1, width=48, enc_depths=[2,2,4], middle_blocks=8)
wpath = NAFNET_WEIGHTS
else:
raise ValueError("Unknown model")
model = model.to(DEVICE)
pretrained = False
if os.path.exists(wpath):
try:
model.load_state_dict(torch.load(wpath, map_location=DEVICE))
print("Loaded pretrained weights from", wpath)
pretrained = True
except Exception as e:
print("Failed loading weights:", e)
# 3) finetune self-supervised if no pretrained
if (not pretrained) and FINETUNE:
print("No pretrained weights found — running self-supervised finetuning")
model = finetune_selfsupervised(model, image, epochs=FINETUNE_EPOCHS, batch_size=FINETUNE_BATCH, lr=LEARNING_RATE, device=DEVICE)
os.makedirs(WEIGHTS_DIR, exist_ok=True)
save_path = os.path.join(WEIGHTS_DIR, f"{MODEL_NAME}_finetuned.pth")
torch.save(model.state_dict(), save_path)
print("Saved finetuned weights to", save_path)
# 4) batched patch inference on GPU/CPU
print("Running patch-based batched inference ...")
torch.backends.cudnn.enabled = False
torch.backends.cudnn.deterministic = True
denoised_norm = batched_patch_inference(image, model, PATCH_H, PATCH_W, STRIDE_H, STRIDE_W, DEVICE, batch_size=BATCH_INFERENCE)
denoised = denoised_norm * glob_std + glob_mean
# denoised = sliding_window_inference(image, model, PATCH_H, PATCH_W, STRIDE_H, STRIDE_W, DEVICE)
image = image_raw
# 5) visualize
diff = image - denoised
vmin = np.percentile(image, 1)
vmax = np.percentile(image, 99)
plt.figure(figsize=(18,6))
plt.subplot(1,3,1)
plt.imshow(image, cmap='turbo', vmin=vmin, vmax=vmax, aspect='auto')
plt.title("Original (Unfiltered)")
plt.axis('off')
plt.subplot(1,3,2)
plt.imshow(denoised, cmap='turbo', vmin=vmin, vmax=vmax, aspect='auto')
plt.title(f"Denoised ({MODEL_NAME})")
plt.axis('off')
plt.subplot(1,3,3)
dmin = np.percentile(diff, 1); dmax = np.percentile(diff, 99)
plt.imshow(diff, cmap='seismic', vmin=dmin, vmax=dmax, aspect='auto')
plt.title("Difference (Original - Denoised)")
plt.axis('off')
plt.tight_layout()
plt.show()
# 6) metrics (no ground truth)
nrr = noise_reduction_rate(image, denoised)
epi = edge_preservation_index(image, denoised)
print(f"Noise reduction rate: {nrr:.4f}")
print(f"Edge preservation index: {epi:.4f}")
# 7) save output
outname = f"images\\denoised_{MODEL_NAME}.npy"
np.save(outname, denoised)
print("Saved denoised image to", outname)
run_pipeline()
In [15]:
run_experiment(model_name='dncnn')
Loaded image: (1212, 140) dtype: float32 Using device: cuda AMP enabled: True No pretrained weights found — running self-supervised finetuning
Finetune ep1/60: 100%|████████████████████████████████████████| 188/188 [00:10<00:00, 18.67it/s, avg_masked_loss=0.566]
Epoch 1 avg masked loss: 0.566199
Finetune ep2/60: 100%|████████████████████████████████████████| 188/188 [00:09<00:00, 18.91it/s, avg_masked_loss=0.516]
Epoch 2 avg masked loss: 0.515835
Finetune ep3/60: 100%|████████████████████████████████████████| 188/188 [00:09<00:00, 18.89it/s, avg_masked_loss=0.463]
Epoch 3 avg masked loss: 0.462995
Finetune ep4/60: 100%|████████████████████████████████████████| 188/188 [00:10<00:00, 18.78it/s, avg_masked_loss=0.429]
Epoch 4 avg masked loss: 0.428673
Finetune ep5/60: 100%|████████████████████████████████████████| 188/188 [00:10<00:00, 18.68it/s, avg_masked_loss=0.405]
Epoch 5 avg masked loss: 0.405171
Finetune ep6/60: 100%|████████████████████████████████████████| 188/188 [00:09<00:00, 18.94it/s, avg_masked_loss=0.386]
Epoch 6 avg masked loss: 0.385711
Finetune ep7/60: 100%|████████████████████████████████████████| 188/188 [00:09<00:00, 18.99it/s, avg_masked_loss=0.367]
Epoch 7 avg masked loss: 0.366895
Finetune ep8/60: 100%|████████████████████████████████████████| 188/188 [00:09<00:00, 19.38it/s, avg_masked_loss=0.353]
Epoch 8 avg masked loss: 0.353387
Finetune ep9/60: 100%|████████████████████████████████████████| 188/188 [00:09<00:00, 19.31it/s, avg_masked_loss=0.346]
Epoch 9 avg masked loss: 0.346006
Finetune ep10/60: 100%|███████████████████████████████████████| 188/188 [00:09<00:00, 19.31it/s, avg_masked_loss=0.333]
Epoch 10 avg masked loss: 0.333385
Finetune ep11/60: 100%|███████████████████████████████████████| 188/188 [00:09<00:00, 19.28it/s, avg_masked_loss=0.323]
Epoch 11 avg masked loss: 0.323212
Finetune ep12/60: 100%|███████████████████████████████████████| 188/188 [00:09<00:00, 19.33it/s, avg_masked_loss=0.301]
Epoch 12 avg masked loss: 0.300533
Finetune ep13/60: 100%|███████████████████████████████████████| 188/188 [00:09<00:00, 19.24it/s, avg_masked_loss=0.297]
Epoch 13 avg masked loss: 0.297353
Finetune ep14/60: 100%|███████████████████████████████████████| 188/188 [00:09<00:00, 19.06it/s, avg_masked_loss=0.295]
Epoch 14 avg masked loss: 0.295431
Finetune ep15/60: 100%|███████████████████████████████████████| 188/188 [00:09<00:00, 19.01it/s, avg_masked_loss=0.291]
Epoch 15 avg masked loss: 0.291277
Finetune ep16/60: 100%|███████████████████████████████████████| 188/188 [00:09<00:00, 19.04it/s, avg_masked_loss=0.289]
Epoch 16 avg masked loss: 0.289310
Finetune ep17/60: 100%|███████████████████████████████████████| 188/188 [00:10<00:00, 18.77it/s, avg_masked_loss=0.286]
Epoch 17 avg masked loss: 0.286080
Finetune ep18/60: 100%|███████████████████████████████████████| 188/188 [00:09<00:00, 18.83it/s, avg_masked_loss=0.285]
Epoch 18 avg masked loss: 0.284657
Finetune ep19/60: 100%|███████████████████████████████████████| 188/188 [00:09<00:00, 18.85it/s, avg_masked_loss=0.281]
Epoch 19 avg masked loss: 0.280540
Finetune ep20/60: 100%|████████████████████████████████████████| 188/188 [00:09<00:00, 19.11it/s, avg_masked_loss=0.28]
Epoch 20 avg masked loss: 0.280080
Finetune ep21/60: 100%|████████████████████████████████████████| 188/188 [00:09<00:00, 19.15it/s, avg_masked_loss=0.28]
Epoch 21 avg masked loss: 0.279532
Finetune ep22/60: 100%|████████████████████████████████████████| 188/188 [00:09<00:00, 19.05it/s, avg_masked_loss=0.27]
Epoch 22 avg masked loss: 0.270020
Finetune ep23/60: 100%|███████████████████████████████████████| 188/188 [00:09<00:00, 19.02it/s, avg_masked_loss=0.263]
Epoch 23 avg masked loss: 0.263083
Finetune ep24/60: 100%|███████████████████████████████████████| 188/188 [00:09<00:00, 19.01it/s, avg_masked_loss=0.262]
Epoch 24 avg masked loss: 0.261824
Finetune ep25/60: 100%|███████████████████████████████████████| 188/188 [00:09<00:00, 19.00it/s, avg_masked_loss=0.261]
Epoch 25 avg masked loss: 0.261043
Finetune ep26/60: 100%|████████████████████████████████████████| 188/188 [00:09<00:00, 19.12it/s, avg_masked_loss=0.26]
Epoch 26 avg masked loss: 0.260394
Finetune ep27/60: 100%|████████████████████████████████████████| 188/188 [00:09<00:00, 19.13it/s, avg_masked_loss=0.26]
Epoch 27 avg masked loss: 0.260134
Finetune ep28/60: 100%|███████████████████████████████████████| 188/188 [00:09<00:00, 19.14it/s, avg_masked_loss=0.258]
Epoch 28 avg masked loss: 0.258419
Finetune ep29/60: 100%|███████████████████████████████████████| 188/188 [00:09<00:00, 18.99it/s, avg_masked_loss=0.258]
Epoch 29 avg masked loss: 0.258014
Finetune ep30/60: 100%|███████████████████████████████████████| 188/188 [00:09<00:00, 19.14it/s, avg_masked_loss=0.257]
Epoch 30 avg masked loss: 0.256908
Finetune ep31/60: 100%|███████████████████████████████████████| 188/188 [00:09<00:00, 19.10it/s, avg_masked_loss=0.256]
Epoch 31 avg masked loss: 0.255564
Finetune ep32/60: 100%|███████████████████████████████████████| 188/188 [00:09<00:00, 19.10it/s, avg_masked_loss=0.257]
Epoch 32 avg masked loss: 0.257135
Finetune ep33/60: 100%|███████████████████████████████████████| 188/188 [00:09<00:00, 19.07it/s, avg_masked_loss=0.256]
Epoch 33 avg masked loss: 0.256209
Finetune ep34/60: 100%|███████████████████████████████████████| 188/188 [00:09<00:00, 19.10it/s, avg_masked_loss=0.256]
Epoch 34 avg masked loss: 0.255791
Finetune ep35/60: 100%|███████████████████████████████████████| 188/188 [00:10<00:00, 18.46it/s, avg_masked_loss=0.262]
Epoch 35 avg masked loss: 0.262421
Finetune ep36/60: 100%|███████████████████████████████████████| 188/188 [00:10<00:00, 18.27it/s, avg_masked_loss=0.271]
Epoch 36 avg masked loss: 0.271137
Finetune ep37/60: 100%|███████████████████████████████████████| 188/188 [00:10<00:00, 18.33it/s, avg_masked_loss=0.269]
Epoch 37 avg masked loss: 0.269441
Finetune ep38/60: 100%|███████████████████████████████████████| 188/188 [00:10<00:00, 18.17it/s, avg_masked_loss=0.267]
Epoch 38 avg masked loss: 0.267018
Finetune ep39/60: 100%|███████████████████████████████████████| 188/188 [00:10<00:00, 18.16it/s, avg_masked_loss=0.264]
Epoch 39 avg masked loss: 0.264344
Finetune ep40/60: 100%|███████████████████████████████████████| 188/188 [00:10<00:00, 18.11it/s, avg_masked_loss=0.262]
Epoch 40 avg masked loss: 0.262361
Finetune ep41/60: 100%|███████████████████████████████████████| 188/188 [00:10<00:00, 18.07it/s, avg_masked_loss=0.265]
Epoch 41 avg masked loss: 0.264808
Finetune ep42/60: 100%|████████████████████████████████████████| 188/188 [00:10<00:00, 18.16it/s, avg_masked_loss=0.26]
Epoch 42 avg masked loss: 0.259634
Finetune ep43/60: 100%|███████████████████████████████████████| 188/188 [00:10<00:00, 18.15it/s, avg_masked_loss=0.261]
Epoch 43 avg masked loss: 0.260978
Finetune ep44/60: 100%|███████████████████████████████████████| 188/188 [00:10<00:00, 18.32it/s, avg_masked_loss=0.258]
Epoch 44 avg masked loss: 0.257982
Finetune ep45/60: 100%|███████████████████████████████████████| 188/188 [00:10<00:00, 18.31it/s, avg_masked_loss=0.258]
Epoch 45 avg masked loss: 0.257884
Finetune ep46/60: 100%|███████████████████████████████████████| 188/188 [00:10<00:00, 18.21it/s, avg_masked_loss=0.247]
Epoch 46 avg masked loss: 0.247034
Finetune ep47/60: 100%|███████████████████████████████████████| 188/188 [00:10<00:00, 18.30it/s, avg_masked_loss=0.244]
Epoch 47 avg masked loss: 0.243581
Finetune ep48/60: 100%|███████████████████████████████████████| 188/188 [00:10<00:00, 18.28it/s, avg_masked_loss=0.252]
Epoch 48 avg masked loss: 0.252193
Finetune ep49/60: 100%|███████████████████████████████████████| 188/188 [00:10<00:00, 18.25it/s, avg_masked_loss=0.254]
Epoch 49 avg masked loss: 0.253541
Finetune ep50/60: 100%|███████████████████████████████████████| 188/188 [00:10<00:00, 18.10it/s, avg_masked_loss=0.256]
Epoch 50 avg masked loss: 0.255631
Finetune ep51/60: 100%|███████████████████████████████████████| 188/188 [00:10<00:00, 18.06it/s, avg_masked_loss=0.253]
Epoch 51 avg masked loss: 0.252504
Finetune ep52/60: 100%|███████████████████████████████████████| 188/188 [00:10<00:00, 18.10it/s, avg_masked_loss=0.253]
Epoch 52 avg masked loss: 0.252895
Finetune ep53/60: 100%|███████████████████████████████████████| 188/188 [00:10<00:00, 18.04it/s, avg_masked_loss=0.253]
Epoch 53 avg masked loss: 0.253318
Finetune ep54/60: 100%|███████████████████████████████████████| 188/188 [00:10<00:00, 18.06it/s, avg_masked_loss=0.253]
Epoch 54 avg masked loss: 0.253153
Finetune ep55/60: 100%|████████████████████████████████████████| 188/188 [00:10<00:00, 18.26it/s, avg_masked_loss=0.25]
Epoch 55 avg masked loss: 0.249765
Finetune ep56/60: 100%|████████████████████████████████████████| 188/188 [00:10<00:00, 18.38it/s, avg_masked_loss=0.25]
Epoch 56 avg masked loss: 0.249837
Finetune ep57/60: 100%|████████████████████████████████████████| 188/188 [00:10<00:00, 18.38it/s, avg_masked_loss=0.25]
Epoch 57 avg masked loss: 0.249515
Finetune ep58/60: 100%|███████████████████████████████████████| 188/188 [00:10<00:00, 18.39it/s, avg_masked_loss=0.249]
Epoch 58 avg masked loss: 0.248727
Finetune ep59/60: 100%|███████████████████████████████████████| 188/188 [00:10<00:00, 18.33it/s, avg_masked_loss=0.238]
Epoch 59 avg masked loss: 0.238493
Finetune ep60/60: 100%|███████████████████████████████████████| 188/188 [00:10<00:00, 18.35it/s, avg_masked_loss=0.237]
Epoch 60 avg masked loss: 0.236558 Saved finetuned weights to ./weights\dncnn_finetuned.pth Running patch-based batched inference ... Processing patches: 57
Noise reduction rate: 0.3278 Edge preservation index: 0.9389 Saved denoised image to images\denoised_dncnn.npy
In [16]:
run_experiment(model_name='tinynaf')
Loaded image: (1212, 140) dtype: float32 Using device: cuda AMP enabled: True No pretrained weights found — running self-supervised finetuning
Finetune ep1/60: 100%|████████████████████████████████████████| 188/188 [00:23<00:00, 8.00it/s, avg_masked_loss=0.583]
Epoch 1 avg masked loss: 0.583086
Finetune ep2/60: 100%|████████████████████████████████████████| 188/188 [00:23<00:00, 7.90it/s, avg_masked_loss=0.575]
Epoch 2 avg masked loss: 0.575002
Finetune ep3/60: 100%|████████████████████████████████████████| 188/188 [00:23<00:00, 7.97it/s, avg_masked_loss=0.571]
Epoch 3 avg masked loss: 0.571249
Finetune ep4/60: 100%|████████████████████████████████████████| 188/188 [00:22<00:00, 8.35it/s, avg_masked_loss=0.568]
Epoch 4 avg masked loss: 0.567818
Finetune ep5/60: 100%|████████████████████████████████████████| 188/188 [00:22<00:00, 8.35it/s, avg_masked_loss=0.564]
Epoch 5 avg masked loss: 0.563757
Finetune ep6/60: 100%|████████████████████████████████████████| 188/188 [00:22<00:00, 8.38it/s, avg_masked_loss=0.561]
Epoch 6 avg masked loss: 0.561141
Finetune ep7/60: 100%|████████████████████████████████████████| 188/188 [00:22<00:00, 8.36it/s, avg_masked_loss=0.559]
Epoch 7 avg masked loss: 0.559056
Finetune ep8/60: 100%|████████████████████████████████████████| 188/188 [00:22<00:00, 8.33it/s, avg_masked_loss=0.558]
Epoch 8 avg masked loss: 0.557725
Finetune ep9/60: 100%|████████████████████████████████████████| 188/188 [00:22<00:00, 8.31it/s, avg_masked_loss=0.555]
Epoch 9 avg masked loss: 0.555493
Finetune ep10/60: 100%|███████████████████████████████████████| 188/188 [00:22<00:00, 8.39it/s, avg_masked_loss=0.554]
Epoch 10 avg masked loss: 0.553784
Finetune ep11/60: 100%|████████████████████████████████████████| 188/188 [00:22<00:00, 8.29it/s, avg_masked_loss=0.55]
Epoch 11 avg masked loss: 0.550317
Finetune ep12/60: 100%|███████████████████████████████████████| 188/188 [00:23<00:00, 8.17it/s, avg_masked_loss=0.548]
Epoch 12 avg masked loss: 0.548100
Finetune ep13/60: 100%|███████████████████████████████████████| 188/188 [00:23<00:00, 8.13it/s, avg_masked_loss=0.546]
Epoch 13 avg masked loss: 0.545955
Finetune ep14/60: 100%|███████████████████████████████████████| 188/188 [00:22<00:00, 8.28it/s, avg_masked_loss=0.543]
Epoch 14 avg masked loss: 0.543287
Finetune ep15/60: 100%|███████████████████████████████████████| 188/188 [00:21<00:00, 8.71it/s, avg_masked_loss=0.541]
Epoch 15 avg masked loss: 0.541433
Finetune ep16/60: 100%|███████████████████████████████████████| 188/188 [00:20<00:00, 8.99it/s, avg_masked_loss=0.539]
Epoch 16 avg masked loss: 0.538775
Finetune ep17/60: 100%|███████████████████████████████████████| 188/188 [00:22<00:00, 8.36it/s, avg_masked_loss=0.536]
Epoch 17 avg masked loss: 0.535848
Finetune ep18/60: 100%|███████████████████████████████████████| 188/188 [00:22<00:00, 8.42it/s, avg_masked_loss=0.534]
Epoch 18 avg masked loss: 0.533939
Finetune ep19/60: 100%|███████████████████████████████████████| 188/188 [00:22<00:00, 8.45it/s, avg_masked_loss=0.531]
Epoch 19 avg masked loss: 0.531044
Finetune ep20/60: 100%|███████████████████████████████████████| 188/188 [00:21<00:00, 8.74it/s, avg_masked_loss=0.529]
Epoch 20 avg masked loss: 0.528835
Finetune ep21/60: 100%|███████████████████████████████████████| 188/188 [00:20<00:00, 9.01it/s, avg_masked_loss=0.526]
Epoch 21 avg masked loss: 0.525798
Finetune ep22/60: 100%|███████████████████████████████████████| 188/188 [00:21<00:00, 8.73it/s, avg_masked_loss=0.523]
Epoch 22 avg masked loss: 0.522559
Finetune ep23/60: 100%|███████████████████████████████████████| 188/188 [00:22<00:00, 8.45it/s, avg_masked_loss=0.521]
Epoch 23 avg masked loss: 0.520831
Finetune ep24/60: 100%|███████████████████████████████████████| 188/188 [00:22<00:00, 8.26it/s, avg_masked_loss=0.519]
Epoch 24 avg masked loss: 0.519301
Finetune ep25/60: 100%|███████████████████████████████████████| 188/188 [00:21<00:00, 8.74it/s, avg_masked_loss=0.517]
Epoch 25 avg masked loss: 0.517167
Finetune ep26/60: 100%|███████████████████████████████████████| 188/188 [00:21<00:00, 8.84it/s, avg_masked_loss=0.516]
Epoch 26 avg masked loss: 0.515731
Finetune ep27/60: 100%|███████████████████████████████████████| 188/188 [00:21<00:00, 8.78it/s, avg_masked_loss=0.514]
Epoch 27 avg masked loss: 0.513751
Finetune ep28/60: 100%|███████████████████████████████████████| 188/188 [00:21<00:00, 8.95it/s, avg_masked_loss=0.511]
Epoch 28 avg masked loss: 0.511412
Finetune ep29/60: 100%|████████████████████████████████████████| 188/188 [00:21<00:00, 8.88it/s, avg_masked_loss=0.51]
Epoch 29 avg masked loss: 0.509895
Finetune ep30/60: 100%|███████████████████████████████████████| 188/188 [00:21<00:00, 8.86it/s, avg_masked_loss=0.507]
Epoch 30 avg masked loss: 0.507253
Finetune ep31/60: 100%|███████████████████████████████████████| 188/188 [00:21<00:00, 8.85it/s, avg_masked_loss=0.505]
Epoch 31 avg masked loss: 0.505422
Finetune ep32/60: 100%|███████████████████████████████████████| 188/188 [00:21<00:00, 8.87it/s, avg_masked_loss=0.504]
Epoch 32 avg masked loss: 0.503879
Finetune ep33/60: 100%|███████████████████████████████████████| 188/188 [00:21<00:00, 8.76it/s, avg_masked_loss=0.501]
Epoch 33 avg masked loss: 0.500942
Finetune ep34/60: 100%|███████████████████████████████████████| 188/188 [00:22<00:00, 8.52it/s, avg_masked_loss=0.499]
Epoch 34 avg masked loss: 0.499352
Finetune ep35/60: 100%|███████████████████████████████████████| 188/188 [00:22<00:00, 8.17it/s, avg_masked_loss=0.498]
Epoch 35 avg masked loss: 0.498164
Finetune ep36/60: 100%|███████████████████████████████████████| 188/188 [00:22<00:00, 8.32it/s, avg_masked_loss=0.497]
Epoch 36 avg masked loss: 0.496917
Finetune ep37/60: 100%|███████████████████████████████████████| 188/188 [00:22<00:00, 8.38it/s, avg_masked_loss=0.496]
Epoch 37 avg masked loss: 0.496002
Finetune ep38/60: 100%|███████████████████████████████████████| 188/188 [00:22<00:00, 8.38it/s, avg_masked_loss=0.495]
Epoch 38 avg masked loss: 0.494691
Finetune ep39/60: 100%|███████████████████████████████████████| 188/188 [00:24<00:00, 7.77it/s, avg_masked_loss=0.493]
Epoch 39 avg masked loss: 0.493416
Finetune ep40/60: 100%|███████████████████████████████████████| 188/188 [00:23<00:00, 7.84it/s, avg_masked_loss=0.492]
Epoch 40 avg masked loss: 0.491614
Finetune ep41/60: 100%|███████████████████████████████████████| 188/188 [00:23<00:00, 8.17it/s, avg_masked_loss=0.491]
Epoch 41 avg masked loss: 0.490874
Finetune ep42/60: 100%|███████████████████████████████████████| 188/188 [00:22<00:00, 8.43it/s, avg_masked_loss=0.489]
Epoch 42 avg masked loss: 0.489270
Finetune ep43/60: 100%|███████████████████████████████████████| 188/188 [00:22<00:00, 8.39it/s, avg_masked_loss=0.488]
Epoch 43 avg masked loss: 0.488306
Finetune ep44/60: 100%|███████████████████████████████████████| 188/188 [00:23<00:00, 7.97it/s, avg_masked_loss=0.486]
Epoch 44 avg masked loss: 0.486396
Finetune ep45/60: 100%|███████████████████████████████████████| 188/188 [00:24<00:00, 7.82it/s, avg_masked_loss=0.486]
Epoch 45 avg masked loss: 0.485965
Finetune ep46/60: 100%|███████████████████████████████████████| 188/188 [00:23<00:00, 8.03it/s, avg_masked_loss=0.485]
Epoch 46 avg masked loss: 0.484667
Finetune ep47/60: 100%|███████████████████████████████████████| 188/188 [00:22<00:00, 8.33it/s, avg_masked_loss=0.483]
Epoch 47 avg masked loss: 0.483183
Finetune ep48/60: 100%|███████████████████████████████████████| 188/188 [00:22<00:00, 8.34it/s, avg_masked_loss=0.482]
Epoch 48 avg masked loss: 0.481857
Finetune ep49/60: 100%|███████████████████████████████████████| 188/188 [00:22<00:00, 8.30it/s, avg_masked_loss=0.481]
Epoch 49 avg masked loss: 0.481065
Finetune ep50/60: 100%|████████████████████████████████████████| 188/188 [00:22<00:00, 8.35it/s, avg_masked_loss=0.48]
Epoch 50 avg masked loss: 0.479857
Finetune ep51/60: 100%|███████████████████████████████████████| 188/188 [00:22<00:00, 8.34it/s, avg_masked_loss=0.479]
Epoch 51 avg masked loss: 0.478891
Finetune ep52/60: 100%|███████████████████████████████████████| 188/188 [00:22<00:00, 8.37it/s, avg_masked_loss=0.477]
Epoch 52 avg masked loss: 0.476943
Finetune ep53/60: 100%|███████████████████████████████████████| 188/188 [00:22<00:00, 8.39it/s, avg_masked_loss=0.476]
Epoch 53 avg masked loss: 0.476014
Finetune ep54/60: 100%|███████████████████████████████████████| 188/188 [00:22<00:00, 8.30it/s, avg_masked_loss=0.475]
Epoch 54 avg masked loss: 0.474526
Finetune ep55/60: 100%|███████████████████████████████████████| 188/188 [00:22<00:00, 8.18it/s, avg_masked_loss=0.473]
Epoch 55 avg masked loss: 0.473371
Finetune ep56/60: 100%|███████████████████████████████████████| 188/188 [00:23<00:00, 8.14it/s, avg_masked_loss=0.473]
Epoch 56 avg masked loss: 0.472983
Finetune ep57/60: 100%|███████████████████████████████████████| 188/188 [00:22<00:00, 8.32it/s, avg_masked_loss=0.471]
Epoch 57 avg masked loss: 0.471001
Finetune ep58/60: 100%|███████████████████████████████████████| 188/188 [00:22<00:00, 8.38it/s, avg_masked_loss=0.471]
Epoch 58 avg masked loss: 0.470653
Finetune ep59/60: 100%|███████████████████████████████████████| 188/188 [00:21<00:00, 8.55it/s, avg_masked_loss=0.469]
Epoch 59 avg masked loss: 0.469469
Finetune ep60/60: 100%|███████████████████████████████████████| 188/188 [00:23<00:00, 8.08it/s, avg_masked_loss=0.467]
Epoch 60 avg masked loss: 0.467391 Saved finetuned weights to ./weights\tinynaf_finetuned.pth Running patch-based batched inference ... Processing patches: 57
Noise reduction rate: 0.2275 Edge preservation index: 0.6275 Saved denoised image to images\denoised_tinynaf.npy
In [17]:
run_experiment(model_name='nafnet')
Loaded image: (1212, 140) dtype: float32 Using device: cuda AMP enabled: True No pretrained weights found — running self-supervised finetuning
Finetune ep1/60: 100%|████████████████████████████████████████| 188/188 [01:09<00:00, 2.71it/s, avg_masked_loss=0.576]
Epoch 1 avg masked loss: 0.575674
Finetune ep2/60: 100%|█████████████████████████████████████████| 188/188 [01:09<00:00, 2.69it/s, avg_masked_loss=0.57]
Epoch 2 avg masked loss: 0.569864
Finetune ep3/60: 100%|████████████████████████████████████████| 188/188 [01:09<00:00, 2.72it/s, avg_masked_loss=0.564]
Epoch 3 avg masked loss: 0.564427
Finetune ep4/60: 100%|████████████████████████████████████████| 188/188 [01:07<00:00, 2.79it/s, avg_masked_loss=0.556]
Epoch 4 avg masked loss: 0.555598
Finetune ep5/60: 100%|████████████████████████████████████████| 188/188 [01:09<00:00, 2.71it/s, avg_masked_loss=0.549]
Epoch 5 avg masked loss: 0.549377
Finetune ep6/60: 100%|████████████████████████████████████████| 188/188 [01:12<00:00, 2.58it/s, avg_masked_loss=0.547]
Epoch 6 avg masked loss: 0.546901
Finetune ep7/60: 100%|████████████████████████████████████████| 188/188 [01:14<00:00, 2.54it/s, avg_masked_loss=0.543]
Epoch 7 avg masked loss: 0.542945
Finetune ep8/60: 100%|████████████████████████████████████████| 188/188 [01:12<00:00, 2.59it/s, avg_masked_loss=0.539]
Epoch 8 avg masked loss: 0.539350
Finetune ep9/60: 100%|████████████████████████████████████████| 188/188 [01:16<00:00, 2.47it/s, avg_masked_loss=0.535]
Epoch 9 avg masked loss: 0.534690
Finetune ep10/60: 100%|███████████████████████████████████████| 188/188 [01:11<00:00, 2.65it/s, avg_masked_loss=0.531]
Epoch 10 avg masked loss: 0.530638
Finetune ep11/60: 100%|███████████████████████████████████████| 188/188 [01:09<00:00, 2.70it/s, avg_masked_loss=0.525]
Epoch 11 avg masked loss: 0.525140
Finetune ep12/60: 100%|████████████████████████████████████████| 188/188 [01:11<00:00, 2.65it/s, avg_masked_loss=0.52]
Epoch 12 avg masked loss: 0.519850
Finetune ep13/60: 100%|███████████████████████████████████████| 188/188 [01:09<00:00, 2.69it/s, avg_masked_loss=0.517]
Epoch 13 avg masked loss: 0.516506
Finetune ep14/60: 100%|███████████████████████████████████████| 188/188 [01:12<00:00, 2.58it/s, avg_masked_loss=0.513]
Epoch 14 avg masked loss: 0.512850
Finetune ep15/60: 100%|████████████████████████████████████████| 188/188 [01:12<00:00, 2.60it/s, avg_masked_loss=0.51]
Epoch 15 avg masked loss: 0.509733
Finetune ep16/60: 100%|███████████████████████████████████████| 188/188 [01:15<00:00, 2.48it/s, avg_masked_loss=0.506]
Epoch 16 avg masked loss: 0.505895
Finetune ep17/60: 100%|███████████████████████████████████████| 188/188 [01:11<00:00, 2.61it/s, avg_masked_loss=0.503]
Epoch 17 avg masked loss: 0.502736
Finetune ep18/60: 100%|███████████████████████████████████████| 188/188 [01:12<00:00, 2.61it/s, avg_masked_loss=0.499]
Epoch 18 avg masked loss: 0.498975
Finetune ep19/60: 100%|███████████████████████████████████████| 188/188 [01:13<00:00, 2.58it/s, avg_masked_loss=0.495]
Epoch 19 avg masked loss: 0.495353
Finetune ep20/60: 100%|███████████████████████████████████████| 188/188 [01:12<00:00, 2.58it/s, avg_masked_loss=0.492]
Epoch 20 avg masked loss: 0.491932
Finetune ep21/60: 100%|███████████████████████████████████████| 188/188 [01:13<00:00, 2.55it/s, avg_masked_loss=0.488]
Epoch 21 avg masked loss: 0.487806
Finetune ep22/60: 100%|███████████████████████████████████████| 188/188 [01:15<00:00, 2.49it/s, avg_masked_loss=0.484]
Epoch 22 avg masked loss: 0.483849
Finetune ep23/60: 100%|████████████████████████████████████████| 188/188 [01:08<00:00, 2.75it/s, avg_masked_loss=0.48]
Epoch 23 avg masked loss: 0.480228
Finetune ep24/60: 100%|███████████████████████████████████████| 188/188 [01:06<00:00, 2.81it/s, avg_masked_loss=0.478]
Epoch 24 avg masked loss: 0.477912
Finetune ep25/60: 100%|███████████████████████████████████████| 188/188 [01:07<00:00, 2.80it/s, avg_masked_loss=0.475]
Epoch 25 avg masked loss: 0.474648
Finetune ep26/60: 100%|███████████████████████████████████████| 188/188 [01:07<00:00, 2.77it/s, avg_masked_loss=0.472]
Epoch 26 avg masked loss: 0.472176
Finetune ep27/60: 100%|████████████████████████████████████████| 188/188 [01:08<00:00, 2.75it/s, avg_masked_loss=0.47]
Epoch 27 avg masked loss: 0.469566
Finetune ep28/60: 100%|███████████████████████████████████████| 188/188 [01:08<00:00, 2.74it/s, avg_masked_loss=0.468]
Epoch 28 avg masked loss: 0.467748
Finetune ep29/60: 100%|███████████████████████████████████████| 188/188 [01:09<00:00, 2.70it/s, avg_masked_loss=0.467]
Epoch 29 avg masked loss: 0.466767
Finetune ep30/60: 100%|███████████████████████████████████████| 188/188 [01:10<00:00, 2.68it/s, avg_masked_loss=0.463]
Epoch 30 avg masked loss: 0.462549
Finetune ep31/60: 100%|███████████████████████████████████████| 188/188 [01:09<00:00, 2.69it/s, avg_masked_loss=0.457]
Epoch 31 avg masked loss: 0.457155
Finetune ep32/60: 100%|███████████████████████████████████████| 188/188 [01:09<00:00, 2.69it/s, avg_masked_loss=0.452]
Epoch 32 avg masked loss: 0.452185
Finetune ep33/60: 100%|███████████████████████████████████████| 188/188 [01:10<00:00, 2.67it/s, avg_masked_loss=0.447]
Epoch 33 avg masked loss: 0.446816
Finetune ep34/60: 100%|████████████████████████████████████████| 188/188 [01:11<00:00, 2.63it/s, avg_masked_loss=0.44]
Epoch 34 avg masked loss: 0.440123
Finetune ep35/60: 100%|███████████████████████████████████████| 188/188 [01:10<00:00, 2.68it/s, avg_masked_loss=0.435]
Epoch 35 avg masked loss: 0.434676
Finetune ep36/60: 100%|███████████████████████████████████████| 188/188 [01:11<00:00, 2.61it/s, avg_masked_loss=0.428]
Epoch 36 avg masked loss: 0.428065
Finetune ep37/60: 100%|███████████████████████████████████████| 188/188 [01:09<00:00, 2.70it/s, avg_masked_loss=0.423]
Epoch 37 avg masked loss: 0.422623
Finetune ep38/60: 100%|███████████████████████████████████████| 188/188 [01:09<00:00, 2.70it/s, avg_masked_loss=0.416]
Epoch 38 avg masked loss: 0.416175
Finetune ep39/60: 100%|████████████████████████████████████████| 188/188 [01:08<00:00, 2.74it/s, avg_masked_loss=0.41]
Epoch 39 avg masked loss: 0.410339
Finetune ep40/60: 100%|███████████████████████████████████████| 188/188 [01:07<00:00, 2.77it/s, avg_masked_loss=0.405]
Epoch 40 avg masked loss: 0.405193
Finetune ep41/60: 100%|█████████████████████████████████████████| 188/188 [01:10<00:00, 2.67it/s, avg_masked_loss=0.4]
Epoch 41 avg masked loss: 0.400149
Finetune ep42/60: 100%|███████████████████████████████████████| 188/188 [01:07<00:00, 2.77it/s, avg_masked_loss=0.395]
Epoch 42 avg masked loss: 0.394540
Finetune ep43/60: 100%|███████████████████████████████████████| 188/188 [01:13<00:00, 2.56it/s, avg_masked_loss=0.389]
Epoch 43 avg masked loss: 0.389316
Finetune ep44/60: 100%|███████████████████████████████████████| 188/188 [01:12<00:00, 2.60it/s, avg_masked_loss=0.384]
Epoch 44 avg masked loss: 0.383870
Finetune ep45/60: 100%|███████████████████████████████████████| 188/188 [01:12<00:00, 2.60it/s, avg_masked_loss=0.379]
Epoch 45 avg masked loss: 0.379256
Finetune ep46/60: 100%|███████████████████████████████████████| 188/188 [01:12<00:00, 2.59it/s, avg_masked_loss=0.374]
Epoch 46 avg masked loss: 0.374402
Finetune ep47/60: 100%|███████████████████████████████████████| 188/188 [01:09<00:00, 2.71it/s, avg_masked_loss=0.371]
Epoch 47 avg masked loss: 0.370524
Finetune ep48/60: 100%|███████████████████████████████████████| 188/188 [01:12<00:00, 2.60it/s, avg_masked_loss=0.366]
Epoch 48 avg masked loss: 0.366335
Finetune ep49/60: 100%|███████████████████████████████████████| 188/188 [01:09<00:00, 2.69it/s, avg_masked_loss=0.362]
Epoch 49 avg masked loss: 0.361983
Finetune ep50/60: 100%|███████████████████████████████████████| 188/188 [01:11<00:00, 2.64it/s, avg_masked_loss=0.358]
Epoch 50 avg masked loss: 0.358076
Finetune ep51/60: 100%|███████████████████████████████████████| 188/188 [01:10<00:00, 2.68it/s, avg_masked_loss=0.354]
Epoch 51 avg masked loss: 0.354187
Finetune ep52/60: 100%|███████████████████████████████████████| 188/188 [01:09<00:00, 2.70it/s, avg_masked_loss=0.351]
Epoch 52 avg masked loss: 0.351126
Finetune ep53/60: 100%|███████████████████████████████████████| 188/188 [01:11<00:00, 2.65it/s, avg_masked_loss=0.347]
Epoch 53 avg masked loss: 0.347191
Finetune ep54/60: 100%|███████████████████████████████████████| 188/188 [01:09<00:00, 2.69it/s, avg_masked_loss=0.343]
Epoch 54 avg masked loss: 0.343472
Finetune ep55/60: 100%|████████████████████████████████████████| 188/188 [01:11<00:00, 2.61it/s, avg_masked_loss=0.34]
Epoch 55 avg masked loss: 0.340244
Finetune ep56/60: 100%|███████████████████████████████████████| 188/188 [01:10<00:00, 2.66it/s, avg_masked_loss=0.337]
Epoch 56 avg masked loss: 0.336987
Finetune ep57/60: 100%|███████████████████████████████████████| 188/188 [01:10<00:00, 2.65it/s, avg_masked_loss=0.334]
Epoch 57 avg masked loss: 0.333639
Finetune ep58/60: 100%|███████████████████████████████████████| 188/188 [01:09<00:00, 2.72it/s, avg_masked_loss=0.331]
Epoch 58 avg masked loss: 0.331197
Finetune ep59/60: 100%|███████████████████████████████████████| 188/188 [01:07<00:00, 2.79it/s, avg_masked_loss=0.328]
Epoch 59 avg masked loss: 0.328341
Finetune ep60/60: 100%|███████████████████████████████████████| 188/188 [01:08<00:00, 2.74it/s, avg_masked_loss=0.325]
Epoch 60 avg masked loss: 0.324506 Saved finetuned weights to ./weights\nafnet_finetuned.pth Running patch-based batched inference ... Processing patches: 57
Noise reduction rate: 0.3131 Edge preservation index: 0.5233 Saved denoised image to images\denoised_nafnet.npy
Визуализация полученных результатов¶
In [18]:
initial_data_unfiltered = np.load('images\\initial_data_unfiltered.npy')
initial_data_filtered = np.load('images\\initial_data_filtered.npy')
baseline_median_data_filtered = np.load('images\\baseline_median_data_filtered.npy')
bm3d_data_filtered = np.load('images\\bm3d_data_filtered.npy')
denoised_dncnn_data_filtered = np.load('images\\denoised_dncnn.npy')
denoised_tinynafnet_data_filtered = np.load('images\\denoised_tinynaf.npy')
denoised_nafnet_data_filtered = np.load('images\\denoised_nafnet.npy')
print(" ############# INITIAL FILTERING ############# ")
print_results(initial_data_filtered, initial_data_unfiltered)
print("\n")
print(" ############# BASELINE MEDIAN FILTERING ############# ")
print_results(baseline_median_data_filtered, initial_data_unfiltered)
print("\n")
print(" ############# BM3D FILTERING ############# ")
print_results(bm3d_data_filtered, initial_data_unfiltered)
print("\n")
print(" ############# DnCNN FILTERING ############# ")
print_results(denoised_dncnn_data_filtered, initial_data_unfiltered)
print("\n")
print(" ############# TinyNAFNet FILTERING ############# ")
print_results(denoised_tinynafnet_data_filtered, initial_data_unfiltered)
print("\n")
print(" ############# NAFNet FILTERING ############# ")
print_results(denoised_nafnet_data_filtered, initial_data_unfiltered)
############# INITIAL FILTERING #############
############# BASELINE MEDIAN FILTERING #############
############# BM3D FILTERING #############
############# DnCNN FILTERING #############
############# TinyNAFNet FILTERING #############
############# NAFNet FILTERING #############